library(tidyverse)
library(collapse)
library(onezero)
# wow good news, as nrow gets larger, the better qtab() seems to
# perform
item.df <- FoodSample %>% select(-c(id, weight)) %>% slice_sample(n = 1e6, replace = TRUE)
item.df2 <- mutate(
item.df,
across(
.fns = ~factor(.x, levels = c(1, 0))
)
)
wgt.vec <- rep(1, times = nrow(item.df))
M <- ncol(item.df)
out <- matrix(
data = NA, nrow = M, ncol = M,
dimnames = list(names(item.df), names(item.df))
)
f <- function() {
for (i in 1:M) {
for (j in 1:M) {
# evaluate later
if (i <= j) next
# creates logical vector of when i & j are both 1
condition.met <- (item.df[[i]] + item.df[[j]]) == 2
# take weighted mean of condition and weights
out[i, j] <- fmean(
x = condition.met,
w = wgt.vec,
na.rm = TRUE
)
}
}
out
}
g <- function() {
for (i in 1:M) {
for (j in 1:M) {
# evaluate later
if (i <= j) next
# creates logical vector of when i & j are both 1
foo <- qtab(
item.df2[[i]],
item.df2[[j]],
w = wgt.vec
)
out[i, j] <- foo[1, 1] / sum(foo)
}
}
out
}
microbenchmark::microbenchmark(
f(), g(), times = 50, unit = "s"
)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.